--- title: Title keywords: fastai sidebar: home_sidebar summary: "summary" description: "summary" ---
{% raw %}

Train LVQuan19 for LV phase classification

%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai import *
from fastai.vision import *
from fastai.data_block import *
import scipy.io as sio
from fastai.callbacks import *
from mrimage_update import *

# path = Path('/home/projects/data/lvquan/')
path = Path('/media/ismael/01D2273CA76327A0/datasets/LVQuan20xx/2019/')
(path/'train/').ls()[:5]
[PosixPath('/media/ismael/01D2273CA76327A0/datasets/LVQuan20xx/2019/train/patient25.mat'),
 PosixPath('/media/ismael/01D2273CA76327A0/datasets/LVQuan20xx/2019/train/patient41.mat'),
 PosixPath('/media/ismael/01D2273CA76327A0/datasets/LVQuan20xx/2019/train/models'),
 PosixPath('/media/ismael/01D2273CA76327A0/datasets/LVQuan20xx/2019/train/patient1.mat'),
 PosixPath('/media/ismael/01D2273CA76327A0/datasets/LVQuan20xx/2019/train/patient10.mat')]
def phase_labeler(fn):
    classes=np.array(['diastolic','systolic'])
    return classes[sio.loadmat(fn)['lv_phase'][0]]    
src_data = (MRImageCtxList.from_folder(path/'train/', extensions=['.mat']))
                         
# defaults.device = 'cpu'
tfms = get_transforms()
classes=np.array(['diastolic','systolic'])
src_data = (MRImageCtxList.from_folder(path/'train/', extensions=['.mat'])
                         .split_by_rand_pct(seed=42)
                         .label_from_func(phase_labeler, label_cls=MRMultiCategoryList)
                         .transform(tfms, size=256, slicewise=False)
           )
                         
data = src_data.databunch(bs=1)
data.train_ds.y
MRMultiCategoryList (45 items)
[1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0]; 0 = diastole, 1 = systole.,[0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0]; 0 = diastole, 1 = systole.,[1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 1]; 0 = diastole, 1 = systole.,[0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0]; 0 = diastole, 1 = systole.,[1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0]; 0 = diastole, 1 = systole.
Path: /media/ismael/01D2273CA76327A0/datasets/LVQuan20xx/2019/train
data = src_data.databunch(bs=1)
data.normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet34)
model = nn.Sequential(Lambda(mri_to_images), learn.model,
                      LambdaWithArgs(out_to_mri_out, n_slices=20))
/home/ismael/Utils/miniconda2/envs/fastai/lib/python3.6/site-packages/torch/cuda/__init__.py:117: UserWarning: 
    Found GPU0 GF117 which is of cuda capability 2.1.
    PyTorch no longer supports this GPU because it is too old.
    
  warnings.warn(old_gpu_warn % (d, name, major, capability[1]))
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-8-7762905e7bce> in <module>
      1 data = src_data.databunch(bs=1)
      2 data.normalize(imagenet_stats)
----> 3 learn = cnn_learner(data, models.resnet34)
      4 model = nn.Sequential(Lambda(mri_to_images), learn.model,
      5                       LambdaWithArgs(out_to_mri_out, n_slices=20))

~/projects/fastai-1.0/course-v3/nbs/perso/fastai/vision/learner.py in cnn_learner(data, base_arch, cut, pretrained, lin_ftrs, ps, custom_head, split_on, bn_final, init, concat_pool, **kwargs)
     99     learn.split(split_on or meta['split'])
    100     if pretrained: learn.freeze()
--> 101     if init: apply_init(model[1], nn.init.kaiming_normal_)
    102     return learn
    103 

~/projects/fastai-1.0/course-v3/nbs/perso/fastai/torch_core.py in apply_init(m, init_func)
    254 def apply_init(m, init_func:LayerFunc):
    255     "Initialize all non-batchnorm layers of `m` with `init_func`."
--> 256     apply_leaf(m, partial(cond_init, init_func=init_func))
    257 
    258 def in_channels(m:nn.Module) -> List[int]:

~/projects/fastai-1.0/course-v3/nbs/perso/fastai/torch_core.py in apply_leaf(m, f)
    250     c = children(m)
    251     if isinstance(m, nn.Module): f(m)
--> 252     for l in c: apply_leaf(l,f)
    253 
    254 def apply_init(m, init_func:LayerFunc):

~/projects/fastai-1.0/course-v3/nbs/perso/fastai/torch_core.py in apply_leaf(m, f)
    249     "Apply `f` to children of `m`."
    250     c = children(m)
--> 251     if isinstance(m, nn.Module): f(m)
    252     for l in c: apply_leaf(l,f)
    253 

~/projects/fastai-1.0/course-v3/nbs/perso/fastai/torch_core.py in cond_init(m, init_func)
    244 def cond_init(m:nn.Module, init_func:LayerFunc):
    245     "Initialize the non-batchnorm layers of `m` with `init_func`."
--> 246     if (not isinstance(m, bn_types)) and requires_grad(m): init_default(m, init_func)
    247 
    248 def apply_leaf(m:nn.Module, f:LayerFunc):

~/projects/fastai-1.0/course-v3/nbs/perso/fastai/torch_core.py in init_default(m, func)
    239     if func:
    240         if hasattr(m, 'weight'): func(m.weight)
--> 241         if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.)
    242     return m
    243 

RuntimeError: cuda runtime error (48) : no kernel image is available for execution on the device at /pytorch/aten/src/THC/generic/THCTensorMath.cu:14
# data.normalize(imagenet_stats)
data.show_batch(rows=1)
learn.model = model
learn.opt_func
functools.partial(<class 'torch.optim.adam.Adam'>, betas=(0.9, 0.99))
lr_find(learn)
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
learn.recorder.plot()
x, y = data.one_batch(cpu=False)
res = model(x)
x.shape
torch.Size([5, 20, 3, 256, 256])
y.shape, res.shape
(torch.Size([5, 20]), torch.Size([5, 20, 2]))
learn.metrics += [accuracy]
lr = 1e-2
# learn.metrics[-1] = [accuracy]
best_cb =SaveModelCallback(learn, monitor='accuracy', mode='max', name='exp1ctx-1')
fit_one_cycle(learn, 20, lr, callbacks=[best_cb])
epoch train_loss valid_loss accuracy time
0 0.974523 0.732507 0.563636 00:08
1 0.878615 0.782789 0.618182 00:08
2 0.806672 1.347975 0.640909 00:08
3 0.755294 1.009939 0.650000 00:09
4 0.691939 0.476657 0.736364 00:09
5 0.640142 0.377402 0.831818 00:08
6 0.594162 0.758759 0.650000 00:09
7 0.556411 0.483480 0.777273 00:08
8 0.531126 0.463462 0.763636 00:08
9 0.495456 0.593302 0.768182 00:09
10 0.470048 0.386198 0.795455 00:09
11 0.442573 0.409307 0.813636 00:08
12 0.424817 0.876453 0.745455 00:09
13 0.410044 0.365433 0.831818 00:09
14 0.393739 0.361335 0.836364 00:10
15 0.374126 0.368583 0.836364 00:09
16 0.360521 0.360496 0.836364 00:09
17 0.349396 0.405443 0.818182 00:08
18 0.342363 0.401246 0.818182 00:08
19 0.336365 0.382929 0.827273 00:08
Better model found at epoch 0 with accuracy value: 0.5636363625526428.
Better model found at epoch 1 with accuracy value: 0.6181817650794983.
Better model found at epoch 2 with accuracy value: 0.6409091353416443.
Better model found at epoch 3 with accuracy value: 0.6499999761581421.
Better model found at epoch 4 with accuracy value: 0.7363636493682861.
Better model found at epoch 5 with accuracy value: 0.831818163394928.
Better model found at epoch 14 with accuracy value: 0.8363635540008545.
Better model found at epoch 15 with accuracy value: 0.8363636136054993.
preds, y = learn.get_preds()
predss = preds.argmax(dim=-1)
i = 10
predss[i], y[i]
(tensor([1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
 tensor([1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
# learn.save('exp1-1')
learn.unfreeze()
lr_find(learn)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
best_cb =SaveModelCallback(learn, monitor='accuracy', mode='max', name='exp1ctx-2')
fit_one_cycle(learn, 20, slice(8e-6, 2e-4), callbacks=[best_cb])
epoch train_loss valid_loss accuracy time
0 0.314285 0.349609 0.831818 00:08
1 0.304329 0.385123 0.836364 00:09
2 0.303301 0.382364 0.836364 00:08
3 0.297246 0.413202 0.822727 00:08
4 0.297811 0.503208 0.790909 00:09
5 0.291843 0.385320 0.836364 00:08
6 0.294059 0.520180 0.800000 00:08
7 0.288741 0.358447 0.840909 00:08
8 0.279464 0.395215 0.827273 00:08
9 0.275306 0.383250 0.818182 00:09
10 0.276753 0.438050 0.827273 00:08
11 0.278600 0.431818 0.818182 00:09
12 0.270339 0.457298 0.818182 00:09
13 0.267388 0.474568 0.809091 00:08
14 0.260513 0.405418 0.836364 00:08
15 0.253430 0.437269 0.831818 00:09
16 0.248520 0.435407 0.827273 00:08
17 0.246659 0.412618 0.818182 00:09
18 0.242954 0.409906 0.827273 00:09
19 0.239107 0.430372 0.822727 00:09
Better model found at epoch 0 with accuracy value: 0.831818163394928.
Better model found at epoch 1 with accuracy value: 0.8363635540008545.
Better model found at epoch 2 with accuracy value: 0.8363636136054993.
Better model found at epoch 7 with accuracy value: 0.8409090042114258.
preds, y = learn.get_preds()
predss = preds.argmax(dim=-1)
data.valid_ds.x[2].show()

The worst results (0 or diastole to all slices) are obtained on MRImages that are not very clear like the one above: these are:

1. '/home/projects/data/lvquan/train/patient50.mat',

2. '/home/projects/data/lvquan/train/patient51.mat'

3. '/home/projects/data/lvquan/train/patient49.mat'

Their indexes in the validation set are: 2, 6, 8

If we remove them from the validation set, the accuracy jumps from 84% to 93.75% !

So if we could preprocess dataset image so as to avoid that, we will win !

idxs = [0, 1, *range(3,6), 7,9,10]
(predss[idxs] == y[idxs]).float().mean()
tensor(0.9375)
data.valid_ds.x.items[[2, 6, 8]]
array([PosixPath('/home/projects/data/lvquan/train/patient50.mat'),
       PosixPath('/home/projects/data/lvquan/train/patient51.mat'),
       PosixPath('/home/projects/data/lvquan/train/patient49.mat')], dtype=object)
# worst result: i = 2, 6, 8
# im
data.valid_ds.x[10].show()
def mri_to_images(mri:Tensor)->Tensor:
    return torch.cat([*mri])

def out_to_mri_out(cats:Tensor, n_slices:int=None)->Tensor:
    assert n_slices != None, 'Must indicate n_slices'
    return cats.reshape(-1, n_slices, *cats.shape[1:])
# learn.unfreeze()
# learn.load('exp1ctx-2')

No evolution, best performance: 0.6863 when unfreezed and 0.6818 when freezed.

Conclusion: Train from scratch without pretraining.

data = src_data.databunch(bs=5)
data.normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet34)
model = nn.Sequential(Lambda(mri_to_images), learn.model,
                      LambdaWithArgs(out_to_mri_out, n_slices=20))
learn.model = model
wd=1e-2
lr_find(learn, wd=wd)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
learn.metrics += [accuracy]
lr = 1e-2
best_cb =SaveModelCallback(learn, monitor='accuracy', mode='max', name='phase_stats-1')
fit_one_cycle(learn, 30, lr, callbacks=[best_cb], wd=wd)
epoch train_loss valid_loss accuracy time
0 0.930421 0.767004 0.631818 00:09
1 0.838768 0.600057 0.672727 00:09
2 0.789922 0.602624 0.718182 00:09
3 0.744088 0.533656 0.763636 00:09
4 0.702356 0.992425 0.704545 00:09
5 0.659396 0.566185 0.777273 00:09
6 0.623018 0.915126 0.700000 00:10
7 0.582000 0.534688 0.768182 00:09
8 0.541101 0.386145 0.795455 00:08
9 0.514533 0.552663 0.827273 00:10
10 0.493357 0.744096 0.786364 00:09
11 0.476920 1.243897 0.731818 00:08
12 0.464580 0.439491 0.818182 00:10
13 0.448405 0.647823 0.777273 00:10
14 0.432937 0.788773 0.754545 00:09
15 0.413426 0.370244 0.850000 00:09
16 0.399163 0.494581 0.818182 00:10
17 0.388929 0.590883 0.795455 00:09
18 0.384024 0.349213 0.809091 00:08
19 0.366024 0.379484 0.831818 00:08
20 0.356770 0.599610 0.786364 00:10
21 0.346438 0.367442 0.813636 00:08
22 0.333777 0.595328 0.786364 00:08
23 0.324059 0.495034 0.809091 00:10
24 0.314989 0.539075 0.813636 00:09
25 0.306998 0.432428 0.813636 00:10
26 0.302356 0.508339 0.818182 00:08
27 0.300573 0.430218 0.831818 00:08
28 0.296672 0.467688 0.818182 00:08
29 0.293188 0.461825 0.822727 00:09
Better model found at epoch 0 with accuracy value: 0.6318182349205017.
Better model found at epoch 1 with accuracy value: 0.6727272868156433.
Better model found at epoch 2 with accuracy value: 0.718181848526001.
Better model found at epoch 3 with accuracy value: 0.7636363506317139.
Better model found at epoch 5 with accuracy value: 0.7772727608680725.
Better model found at epoch 8 with accuracy value: 0.7954545617103577.
Better model found at epoch 9 with accuracy value: 0.8272726535797119.
Better model found at epoch 15 with accuracy value: 0.8500000238418579.
preds, y = learn.get_preds()
predss = preds.argmax(dim=-1)
i = 10
predss[i], y[i]
(tensor([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0]),
 tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
learn.unfreeze()
wd=1e-2
lr_find(learn, wd=wd)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
best_cb =SaveModelCallback(learn, monitor='accuracy', mode='max', name='phase_stats-2')
fit_one_cycle(learn, 30, slice(1e-6, lr/100), callbacks=[best_cb], wd=wd)
epoch train_loss valid_loss accuracy time
0 0.317177 0.417926 0.840909 00:10
1 0.341312 0.437553 0.813636 00:09
2 0.340406 0.465251 0.822727 00:09
3 0.331247 0.407472 0.809091 00:08
4 0.318173 0.419220 0.809091 00:10
5 0.316297 0.709020 0.795454 00:08
6 0.311226 0.615763 0.786364 00:09
7 0.305890 0.497833 0.790909 00:08
8 0.305943 0.467187 0.795454 00:11
9 0.305876 0.418814 0.827273 00:09
10 0.304714 0.477928 0.840909 00:09
11 0.301679 0.433324 0.827273 00:08
12 0.303777 0.487931 0.804545 00:09
13 0.300871 0.513591 0.804545 00:09
14 0.297309 0.463843 0.818182 00:08
15 0.295167 0.521748 0.818182 00:10
16 0.296124 0.488977 0.818182 00:10
17 0.290052 0.416330 0.836364 00:08
18 0.287267 0.455007 0.836364 00:10
19 0.283121 0.452587 0.818182 00:09
20 0.278438 0.474804 0.813636 00:09
21 0.273793 0.453716 0.831818 00:09
22 0.274377 0.503145 0.809091 00:09
23 0.275156 0.536394 0.795455 00:10
24 0.272448 0.461960 0.827273 00:10
25 0.269961 0.436198 0.818182 00:09
26 0.269560 0.431969 0.845454 00:09
27 0.272012 0.453289 0.831818 00:09
28 0.271766 0.460909 0.818182 00:10
29 0.271668 0.452644 0.831818 00:09
Better model found at epoch 0 with accuracy value: 0.8409090638160706.
Better model found at epoch 26 with accuracy value: 0.8454544544219971.
wd=1e-1
lr_find(learn, wd=wd)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
best_cb =SaveModelCallback(learn, monitor='accuracy', mode='max', name='phase_stats-2')
fit_one_cycle(learn, 30, slice(1e-5, 1e-4), callbacks=[best_cb], wd=wd)
epoch train_loss valid_loss accuracy time
0 0.265417 0.446858 0.836364 00:08
1 0.264575 0.528115 0.809091 00:10
2 0.272519 0.517240 0.786364 00:09
3 0.271309 0.476080 0.827273 00:08
4 0.265328 0.364488 0.850000 00:08
5 0.263522 0.356034 0.854545 00:09
6 0.261111 0.421043 0.831818 00:09
7 0.254602 0.450673 0.809091 00:09
8 0.252944 0.400875 0.831818 00:09
9 0.248829 0.406898 0.831818 00:09
10 0.245730 0.423253 0.822727 00:08
11 0.243676 0.410849 0.818182 00:10
12 0.240931 0.459890 0.827273 00:08
13 0.240476 0.768915 0.768182 00:09
14 0.239307 0.447986 0.831818 00:09
15 0.239242 0.474869 0.836364 00:09
16 0.237251 0.533719 0.813636 00:09
17 0.236587 0.635528 0.790909 00:08
18 0.231179 0.405255 0.840909 00:09
19 0.229297 0.444224 0.836364 00:10
20 0.228451 0.493451 0.818182 00:09
21 0.226499 0.416784 0.840909 00:08
22 0.228549 0.506148 0.813636 00:10
23 0.226034 0.421691 0.836364 00:08
24 0.224947 0.449034 0.827273 00:08
25 0.222134 0.483486 0.827273 00:09
26 0.220777 0.454777 0.827273 00:08
27 0.217249 0.468458 0.827273 00:08
28 0.213253 0.471361 0.822727 00:10
29 0.210052 0.434502 0.840909 00:09
Better model found at epoch 0 with accuracy value: 0.8363637328147888.
Better model found at epoch 4 with accuracy value: 0.8499999642372131.
Better model found at epoch 5 with accuracy value: 0.8545454144477844.
wd=0.2
lr_find(learn, wd=wd)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.

Conclusion: pretrained is better as from scratch is very dumb, just give 0 to all.But perhaps it needs more training

learn.export('phase-2.pkl')
learn.load('phase_stats-1')
learn.export('phase-1.pkl')
data = src_data.databunch(bs=5)
data.normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet34)
model = nn.Sequential(Lambda(mri_to_images), learn.model,
                      LambdaWithArgs(out_to_mri_out, n_slices=20))

learn.model = model
partial(optim.SGD, momentum=0.9)
functools.partial(<class 'torch.optim.sgd.SGD'>, momentum=0.9)
optim.SGD()
learn.opt_func = partial(optim.SGD, momentum=0.9)
wd=0.01
lr_find(learn, wd=wd)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
# learn.metrics += [accuracy]
learn.metrics += [accuracy]
lr = 5e-3
best_cb =SaveModelCallback(learn, monitor='accuracy', mode='max', name='phase_sgd-1')
fit_one_cycle(learn, 30, lr, callbacks=[best_cb], wd=wd)
epoch train_loss valid_loss accuracy time
0 0.965183 0.669079 0.600000 00:07
1 0.931890 0.649723 0.604545 00:07
2 0.861996 0.594394 0.736364 00:07
3 0.785626 0.491451 0.777273 00:08
4 0.753985 0.515178 0.750000 00:07
5 0.704186 0.649869 0.700000 00:07
6 0.663989 0.592114 0.754545 00:07
7 0.640176 0.377733 0.840909 00:08
8 0.606677 0.433227 0.804545 00:07
9 0.565788 0.463238 0.818182 00:08
10 0.543040 0.407180 0.831818 00:07
11 0.526449 0.573741 0.763636 00:07
12 0.504139 0.463781 0.818182 00:08
13 0.479371 0.484795 0.813636 00:07
14 0.458106 0.416829 0.813636 00:08
15 0.439326 0.422663 0.804545 00:07
16 0.427131 0.473797 0.804545 00:08
17 0.409432 0.443076 0.827273 00:09
18 0.398232 0.441818 0.818182 00:07
19 0.387822 0.535741 0.795455 00:08
20 0.378867 0.590298 0.790909 00:07
21 0.368127 0.479064 0.800000 00:08
22 0.358833 0.452717 0.845455 00:08
23 0.359187 0.460005 0.845454 00:08
24 0.349907 0.452157 0.859091 00:07
25 0.341969 0.448260 0.827273 00:07
26 0.337457 0.437806 0.850000 00:08
27 0.335118 0.445756 0.827273 00:07
28 0.330179 0.458706 0.822727 00:08
29 0.324756 0.483585 0.818182 00:08
Better model found at epoch 0 with accuracy value: 0.5999999642372131.
Better model found at epoch 1 with accuracy value: 0.6045454144477844.
Better model found at epoch 2 with accuracy value: 0.7363635897636414.
Better model found at epoch 3 with accuracy value: 0.7772727608680725.
Better model found at epoch 7 with accuracy value: 0.8409090638160706.
Better model found at epoch 22 with accuracy value: 0.8454545736312866.
Better model found at epoch 24 with accuracy value: 0.8590908646583557.
preds, y = learn.get_preds()
predss = preds.argmax(2)
preds.shape
torch.Size([11, 20, 2])
i= 10
predss[i], y[i]
(tensor([0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
 tensor([1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
data.valid_ds.x[8].show()
learn.unfreeze()
wd=0.001
lr_find(learn, wd=wd)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
best_cb =SaveModelCallback(learn, monitor='accuracy', mode='max', name='phase_sgd-2')
fit_one_cycle(learn, 30, slice(1e-5, lr/10), callbacks=[best_cb], wd=wd)
epoch train_loss valid_loss accuracy time
0 0.312551 0.465494 0.840909 00:08
1 0.298222 0.453528 0.836364 00:07
2 0.297517 0.441874 0.840909 00:08
3 0.307497 0.454882 0.836364 00:10
4 0.310347 0.470233 0.831818 00:08
5 0.305403 0.453780 0.831818 00:08
6 0.305366 0.482069 0.813636 00:08
7 0.304539 0.449339 0.822727 00:08
8 0.302603 0.453574 0.818182 00:07
9 0.299686 0.457589 0.813636 00:08
10 0.298567 0.453648 0.836364 00:07
11 0.300892 0.456291 0.818182 00:08
12 0.298294 0.445710 0.827273 00:08
13 0.301088 0.438862 0.850000 00:08
14 0.299406 0.441143 0.818182 00:10
15 0.297602 0.453236 0.822727 00:07
16 0.297709 0.479425 0.813636 00:08
17 0.297044 0.471741 0.813636 00:07
18 0.295723 0.457071 0.809091 00:08
19 0.296594 0.451416 0.813636 00:09
20 0.292798 0.446230 0.822727 00:08
21 0.294280 0.459302 0.827273 00:08
22 0.290716 0.439690 0.822727 00:07
23 0.287146 0.464021 0.813636 00:08
24 0.292491 0.464290 0.836364 00:08
25 0.298930 0.478852 0.827273 00:08
26 0.297273 0.454896 0.822727 00:08
27 0.301874 0.465174 0.809091 00:08
28 0.299375 0.442397 0.840909 00:08
29 0.304588 0.479527 0.840909 00:07
Better model found at epoch 0 with accuracy value: 0.8409090638160706.
Better model found at epoch 13 with accuracy value: 0.8500000238418579.
learn = cnn_learner(data, models.resnet34)
model = nn.Sequential(Lambda(mri_to_images), learn.model,
                      LambdaWithArgs(out_to_mri_out, n_slices=20))

learn.model = model
learn.opt_func = partial(optim.RMSprop, momentum=0.9) 
wd=0.01
lr_find(learn, wd=wd)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
learn.metrics +=[accuracy]
lr = 1e-2
best_cb =SaveModelCallback(learn, monitor='accuracy', mode='max', name='phase_rmsprop-1')
fit_one_cycle(learn, 30, lr, callbacks=[best_cb], wd=wd)
epoch train_loss valid_loss accuracy time
0 0.929912 0.657682 0.654545 00:08
1 0.879391 0.608939 0.686364 00:07
2 0.813943 0.610716 0.704545 00:07
3 0.770704 0.612759 0.795455 00:08
4 0.709263 0.609986 0.731818 00:07
5 0.682759 0.355698 0.863636 00:08
6 0.627456 0.472071 0.850000 00:08
7 0.582879 0.330105 0.895454 00:07
8 0.551425 0.834462 0.713636 00:08
9 0.518980 0.478296 0.845455 00:08
10 0.495243 0.471508 0.854545 00:07
11 0.472240 0.336946 0.877273 00:08
12 0.463400 0.338160 0.859091 00:08
13 0.454929 1.349159 0.681818 00:09
14 0.446809 0.314912 0.900000 00:08
15 0.440028 0.422030 0.881818 00:10
16 0.429378 0.469776 0.845455 00:07
17 0.412136 0.632332 0.818182 00:08
18 0.405085 0.410414 0.863636 00:08
19 0.394209 0.409117 0.836364 00:07
20 0.379787 0.398052 0.836364 00:08
21 0.363970 0.386266 0.840909 00:08
22 0.349338 0.397053 0.840909 00:08
23 0.340763 0.371366 0.895455 00:07
24 0.327725 0.366617 0.859091 00:07
25 0.314838 0.356419 0.840909 00:08
26 0.307298 0.352536 0.845455 00:08
27 0.300112 0.382962 0.890909 00:08
28 0.298599 0.352357 0.868182 00:07
29 0.292772 0.356701 0.877273 00:07
Better model found at epoch 0 with accuracy value: 0.6545454263687134.
Better model found at epoch 1 with accuracy value: 0.6863635778427124.
Better model found at epoch 2 with accuracy value: 0.7045454382896423.
Better model found at epoch 3 with accuracy value: 0.7954545617103577.
Better model found at epoch 5 with accuracy value: 0.8636362552642822.
Better model found at epoch 7 with accuracy value: 0.895454466342926.
Better model found at epoch 14 with accuracy value: 0.8999999761581421.
preds, y = learn.get_preds()
predss = preds.argmax(2)
i = 5
y[i], predss[i]
(tensor([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
 tensor([0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
data.train_ds.x[-4].show()
data.train_ds.x[-4].data.shape
torch.Size([20, 3, 512, 512])
res = learn.model(data.train_ds.x[-4].data.unsqueeze(0).cuda())
res.argmax(-1)
tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
       device='cuda:0')
data.train_ds.x[-4].data.shape
torch.Size([20, 3, 512, 512])
learn.unfreeze()
wd=0.01
lr_find(learn, wd=wd)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
best_cb =SaveModelCallback(learn, monitor='accuracy', mode='max', name='phase_rmsprop-2')
fit_one_cycle(learn, 30, slice(2e-6, 2e-5), callbacks=[best_cb], wd=wd)
epoch train_loss valid_loss accuracy time
0 0.365323 0.360041 0.886364 00:08
1 0.346966 0.361458 0.872727 00:08
2 0.347090 0.422912 0.881818 00:07
3 0.342034 0.444895 0.886364 00:08
4 0.337493 0.412455 0.881818 00:09
5 0.331320 0.406654 0.859091 00:09
6 0.325813 0.271264 0.909091 00:08
7 0.315888 0.308466 0.895454 00:08
8 0.313611 0.817884 0.790909 00:07
9 0.308619 0.373203 0.863636 00:09
10 0.308870 0.359644 0.895454 00:07
11 0.301439 0.398671 0.890909 00:07
12 0.292443 0.376759 0.881818 00:07
13 0.278640 0.345033 0.886364 00:09
14 0.271854 0.391786 0.868182 00:08
15 0.267138 0.373891 0.886364 00:10
16 0.257721 0.383098 0.881818 00:08
17 0.252187 0.385753 0.890909 00:08
18 0.248199 0.411318 0.890909 00:08
19 0.244604 0.382835 0.886364 00:10
20 0.236603 0.384936 0.881818 00:09
21 0.237243 0.410686 0.872727 00:09
22 0.229476 0.402310 0.877273 00:08
23 0.231591 0.393613 0.886364 00:08
24 0.227886 0.393623 0.890909 00:08
25 0.226722 0.382005 0.886364 00:09
26 0.226782 0.382769 0.886364 00:08
27 0.221231 0.380578 0.886364 00:08
28 0.220379 0.382450 0.886364 00:08
29 0.215739 0.387030 0.890909 00:08
Better model found at epoch 0 with accuracy value: 0.8863635659217834.
Better model found at epoch 6 with accuracy value: 0.9090908169746399.
src_fdata = (MRImageCtxList.from_folder(path/'train/', extensions=['.mat'])
                         .split_none()
                         .label_from_func(phase_labeler, label_cls=MRMultiCategoryList)
                         .transform(tfms, size=256, slicewise=False)
           )
               
fdata = src_fdata.databunch(bs=5)
fdata.normalize(imagenet_stats)
learn = cnn_learner(fdata, models.resnet34)
model = nn.Sequential(Lambda(mri_to_images), learn.model,
                      LambdaWithArgs(out_to_mri_out, n_slices=20))

learn.model = model
learn.opt_func = partial(optim.RMSprop, momentum=0.9) 
learn.load('phase_rmsprop-2')
# wd=1e-2
# lr_find(learn, wd=wd)
learn.recorder.plot()
lr = 4e-3
fit_one_cycle(learn, 30, lr)
epoch train_loss valid_loss time
0 0.310502 #na# 00:08
1 0.296127 #na# 00:07
2 0.285437 #na# 00:08
3 0.280094 #na# 00:09
4 0.283436 #na# 00:08
5 0.295192 #na# 00:07
6 0.289730 #na# 00:07
7 0.303605 #na# 00:07
8 0.308974 #na# 00:08
9 0.316155 #na# 00:07
10 0.311299 #na# 00:07
11 0.304900 #na# 00:07
12 0.315523 #na# 00:08
13 0.309634 #na# 00:08
14 0.307069 #na# 00:08
15 0.309607 #na# 00:08
16 0.305698 #na# 00:07
17 0.297323 #na# 00:07
18 0.289883 #na# 00:08
19 0.278478 #na# 00:07
20 0.275391 #na# 00:07
21 0.267840 #na# 00:08
22 0.259714 #na# 00:08
23 0.248616 #na# 00:07
24 0.241637 #na# 00:07
25 0.230520 #na# 00:07
26 0.225805 #na# 00:07
27 0.222440 #na# 00:07
28 0.214280 #na# 00:08
29 0.217091 #na# 00:07
learn.save('phase_final-1')
learn.load('phase_final-1')
learn.unfreeze()
wd=1e-3
lr_find(learn, wd=wd)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
fit_one_cycle(learn, 30, slice(1e-6, 8e-5))
epoch train_loss valid_loss time
0 0.190980 #na# 00:09
1 0.201186 #na# 00:07
2 0.215147 #na# 00:09
3 0.202030 #na# 00:09
4 0.204892 #na# 00:08
5 0.215241 #na# 00:09
6 0.223555 #na# 00:07
7 0.220702 #na# 00:07
8 0.213206 #na# 00:08
9 0.210341 #na# 00:08
10 0.201058 #na# 00:08
11 0.199316 #na# 00:08
12 0.193163 #na# 00:08
13 0.186434 #na# 00:07
14 0.182113 #na# 00:07
15 0.180186 #na# 00:07
16 0.177955 #na# 00:08
17 0.175117 #na# 00:07
18 0.172618 #na# 00:08
19 0.174215 #na# 00:09
20 0.170173 #na# 00:08
21 0.166075 #na# 00:08
22 0.163283 #na# 00:09
23 0.165456 #na# 00:08
24 0.168135 #na# 00:07
25 0.164509 #na# 00:08
26 0.160820 #na# 00:09
27 0.155385 #na# 00:07
28 0.153508 #na# 00:08
29 0.148125 #na# 00:08
learn.load('phase_final-2')
learn.export('pfinal2.pkl')
learn = cnn_learner(data, models.resnet34)
model = nn.Sequential(Lambda(mri_to_images), learn.model,
                      LambdaWithArgs(out_to_mri_out, n_slices=20))

learn.model = model
wd=0.01
lr_find(learn, wd=wd)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
learn.metrics += [accuracy]
lr=1e-2
best_cb =SaveModelCallback(learn, monitor='accuracy', mode='max', name='phase_adam-1')
fit_one_cycle(learn, 30, lr, callbacks=[best_cb], wd=wd)
epoch train_loss valid_loss accuracy time
0 0.931872 0.708100 0.636364 00:08
1 0.899296 0.550489 0.727273 00:08
2 0.826373 0.671680 0.740909 00:08
3 0.778929 0.592221 0.804545 00:08
4 0.728600 0.489130 0.813636 00:08
5 0.670522 0.393760 0.818182 00:08
6 0.624613 0.472390 0.809091 00:08
7 0.590188 0.424794 0.850000 00:08
8 0.556790 0.424542 0.813636 00:08
9 0.537160 0.544809 0.827273 00:07
10 0.517562 0.438763 0.822727 00:08
11 0.491368 0.945345 0.695455 00:07
12 0.475939 0.804652 0.754545 00:08
13 0.471815 0.564479 0.804545 00:07
14 0.460127 0.759991 0.790909 00:09
15 0.444690 0.510839 0.877273 00:08
16 0.431266 0.415611 0.877273 00:08
17 0.423021 0.380170 0.863636 00:08
18 0.403673 0.457526 0.859091 00:08
19 0.399507 0.371750 0.886364 00:07
20 0.388753 0.362209 0.881818 00:08
21 0.377531 0.367519 0.886364 00:07
22 0.369749 0.397284 0.881818 00:08
23 0.363795 0.454160 0.877273 00:08
24 0.353920 0.405879 0.854545 00:09
25 0.340398 0.395188 0.886364 00:08
26 0.336158 0.456308 0.872727 00:09
27 0.323833 0.389939 0.881818 00:08
28 0.316578 0.405266 0.886364 00:08
29 0.313389 0.465917 0.881818 00:09
Better model found at epoch 0 with accuracy value: 0.6363636255264282.
Better model found at epoch 1 with accuracy value: 0.7272726893424988.
Better model found at epoch 2 with accuracy value: 0.7409090399742126.
Better model found at epoch 3 with accuracy value: 0.8045454025268555.
Better model found at epoch 4 with accuracy value: 0.8136363625526428.
Better model found at epoch 5 with accuracy value: 0.8181818127632141.
Better model found at epoch 7 with accuracy value: 0.8499999642372131.
Better model found at epoch 15 with accuracy value: 0.8772726655006409.
Better model found at epoch 19 with accuracy value: 0.8863635659217834.
learn.unfreeze()
wd=0.01
lr_find(learn, wd=wd)
learn.recorder.plot()
LR Finder is complete, type {learner_name}.recorder.plot() to see the graph.
best_cb =SaveModelCallback(learn, monitor='accuracy', mode='max', name='phase_adam-2')
fit_one_cycle(learn, 30, slice(7e-5, 8e-4), callbacks=[best_cb], wd=wd)
epoch train_loss valid_loss accuracy time
0 0.338979 0.372603 0.900000 00:09
1 0.318192 0.470078 0.850000 00:08
2 0.334152 0.474656 0.877273 00:08
3 0.327745 0.413054 0.809091 00:09
4 0.331913 1.138486 0.763636 00:09
5 0.339591 1.403044 0.722727 00:08
6 0.336907 0.387383 0.877273 00:09
7 0.332394 0.323987 0.904545 00:09
8 0.325998 0.745306 0.804545 00:08
9 0.329975 1.417567 0.572727 00:08
10 0.330014 0.375960 0.863636 00:08
11 0.321514 0.484275 0.863636 00:08
12 0.318340 0.307178 0.900000 00:08
13 0.311206 0.299710 0.886364 00:08
14 0.301842 0.266632 0.909091 00:08
15 0.300035 0.429969 0.872727 00:08
16 0.301741 0.372586 0.868182 00:09
17 0.294726 0.474646 0.859091 00:09
18 0.293396 0.412457 0.890909 00:08
19 0.286186 0.341138 0.886364 00:08
20 0.273408 0.345530 0.890909 00:08
21 0.265453 0.358420 0.886364 00:08
22 0.260607 0.368266 0.890909 00:08
23 0.252032 0.403910 0.881818 00:09
24 0.244128 0.502213 0.872727 00:10
25 0.236887 0.522091 0.868182 00:07
26 0.235987 0.467629 0.872727 00:08
27 0.230788 0.403328 0.881818 00:08
28 0.220573 0.380378 0.877273 00:09
29 0.216177 0.412985 0.877273 00:08
Better model found at epoch 0 with accuracy value: 0.8999999761581421.
Better model found at epoch 7 with accuracy value: 0.9045454263687134.
Better model found at epoch 14 with accuracy value: 0.9090909361839294.
"change test MRI axis from [n_slices, W, H] to [W, H, n_slices]"
def updateTest(fn, ax=0, dest_ax=-1):
    x = sio.loadmat(fn)
    im = x['image']
    im = np.moveaxis(im, ax, dest_ax)
    x['image'] = im
    sio.savemat(fn,x)
# Apply it one time !!
# [updateTest(str(f)) for f in test.items]
# defaults.device = 'cpu'
test = MRImageList.from_folder(path/'TestData_LVQuan19', extensions=['.mat'])
# learner = load_learner(path/'train', file='pfinal2.pkl', test=test)
test[5].show()
test
MRImageCtxList (30 items)
MRImage (20, 3, 512, 512),MRImage (20, 3, 512, 512),MRImage (20, 3, 512, 512),MRImage (20, 3, 256, 256),MRImage (20, 3, 512, 512)
Path: /home/projects/data/lvquan/TestData_LVQuan19
preds = learner.get_preds(DatasetType.Test)[0].argmax(dim=-1)
preds.shape
torch.Size([30, 20])
preds[29]
tensor([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
predflats = preds.flatten().cpu().numpy()[None]
subname = 'LVQUAN_IK-LB'
dic = {'lv_phase_hat': predflats}
# dic = sio.loadmat(subname)
dic['lv_phase_hat'] = predflats
dic['lv_phase_hat'].shape
(1, 600)
sio.savemat(subname,dic )
rdic = sio.loadmat(subname)
rdic.keys()
dict_keys(['__header__', '__version__', '__globals__', 'areas_hat', 'dims_hat', 'rwt_hat', 'lv_phase_hat'])
{% endraw %}